MPCS
MPCS 实现了GPP和DSP互斥访问共享数据结构,应用程序有时候需要定义属于自己的并能够被多个处理器访问的数据结构,用于多个处理器间的信息通信。但应用程序必须保证某个或多个处理器的各个任务之间都可以互斥地访问这些数据结构,以保证数据的连贯性。MPCS用于解决这个问题。
MPCS组件提供了接口来创建和删除MPCS实体,每个MPCS实体都通过一个系统唯一的字符串来标识。每一个需要使用MPCS的客户端都需要调用API打开函数来获取句柄。不使用MPCS时,用相应API函数来关闭句柄。
如果MPCS对象要求的存储空间由用户提供的话,则它必须位于所有处理器都可以访问的POOL中,如果在创建对象的时候不提供存储空间,则指定池的ID号将被MPCS对象用来分配内部空间。
MPCSXFER 示例阐明了通过一个拥有互斥访问保护的共享内存缓冲区的基本机制实现了数据在 GPP 端与 DSP 端之间的传递与转换。它通过使用 MPCS 的部件来为分配使用 POOL 组件的共享缓冲器提供一个访问保护机制。 GPP 与 DSP 两端的应用程序同步化是通过使用 NOTIFY的部件来实现的。DSP 端应用程序通过使用 MPCS, POOL 和 NOTIFY 的部件来实现 TSK。
--------------------------------------GPP端------------------------------------------
status = MPCSXFER_Create (dspExecutable,strBufferSize,strNumIterations,processorId) ;
status = PROC_setup (NULL) ;
status = PROC_attach (processorId, NULL) ;
status = POOL_open (POOL_makePoolId(processorId, SAMPLE_POOL_ID),&poolAttrs) ;
status = POOL_alloc (POOL_makePoolId(processorId,SAMPLE_POOL_ID),(Void**)&MPCSXFER_CtrlBuf,DSPLINK_ALIGN(sizeof(MPCSXFER_Ctrl),DSPLINK_BUF_ALIGN)) ;
status = POOL_translateAddr (POOL_makePoolId(processorId, SAMPLE_POOL_ID),&dspCtrlBuf,AddrType_Dsp,(Void *) MPCSXFER_CtrlBuf,AddrType_Usr) ;
status = POOL_alloc (POOL_makePoolId(processorId, SAMPLE_POOL_ID),(Void **) &MPCSXFER_DataBuf,MPCSXFER_BufferSize) ;
//获取DSP地址以进行传输
status = POOL_translateAddr (POOL_makePoolId(processorId, SAMPLE_POOL_ID),&dspDataBuf,AddrType_Dsp,(Void *) MPCSXFER_DataBuf,AddrType_Usr)
/* 创建MPCS对象用来防护控制和数据缓冲 */
status = MPCS_create (processorId,MPCSXFER_MpcsObjName,NULL,&mpcsAttrs) ;
/* 打开GPP的MPCS对象 */
status = MPCS_open (processorId,MPCSXFER_MpcsObjName,&MPCSXFER_MpcsHandle) ;
/* 初始化共享控制结构体 */
POOL_writeback (POOL_makePoolId(processorId, SAMPLE_POOL_ID),MPCSXFER_CtrlBuf,DSPLINK_ALIGN (sizeof (MPCSXFER_Ctrl),DSPLINK_BUF_ALIGN)) ;
/* 为通知创建一个信号量 */
status = MPCSXFER_CreateSem (&MPCSXFER_SemPtr) ;
/* 注册DSP侧应用程序设置成功的通知 */
status = NOTIFY_register
(processorId,MPCSXFER_IPS_ID,MPCSXFER_IPS_EVENTNO,
(FnNotifyCbck) MPCSXFER_Notify,MPCSXFER_SemPtr)
;
/* 下载代码 */
status = PROC_load (processorId, (Char8 *) &imageInfo, numArgs, args) ;
status = PROC_start (processorId) ;
/* 等待DSP侧应用程序表明它已经完成设置,当DSP侧应用程序准备好执行进一步执行时,会发送一个IPS事件通知 */
status = MPCSXFER_WaitSem (MPCSXFER_SemPtr) ;
/* 向DSP发送应用程序将会使用的关于控制结构体地址和数据缓冲的通知 */
1 if (DSP_SUCCEEDED (status)) { 2 status = NOTIFY_notify (processorId, 3 MPCSXFER_IPS_ID, 4 MPCSXFER_IPS_EVENTNO, 5 (Uint32) dspCtrlBuf) ; 6 if (DSP_SUCCEEDED (status)) { 7 status = NOTIFY_notify (processorId, 8 MPCSXFER_IPS_ID, 9 MPCSXFER_IPS_EVENTNO, 10 (Uint32) dspDataBuf) ; 11 if (DSP_FAILED (status)) { 12 MPCSXFER_1Print ("NOTIFY_notify () DataBuf failed." 13 " Status = [0x%x]\n", 14 status) ; 15 } 16 } 17 else { 18 MPCSXFER_1Print ("NOTIFY_notify () CtrlBuf failed." 19 " Status = [0x%x]\n", 20 status) ; 21 } 22
status = MPCSXFER_Execute (MPCSXFER_NumIterations,processorId) ;
/* 开始保护共享控制和数据缓冲区。 */
status = MPCS_enter (MPCSXFER_MpcsHandle) ;
POOL_invalidate (POOL_makePoolId(processorId, SAMPLE_POOL_ID), MPCSXFER_CtrlBuf, DSPLINK_ALIGN (sizeof (MPCSXFER_Ctrl), DSPLINK_BUF_ALIGN)) ; POOL_invalidate (POOL_makePoolId(processorId, SAMPLE_POOL_ID), MPCSXFER_DataBuf, MPCSXFER_BufferSize) ; if ( (MPCSXFER_CtrlBuf->procId == processorId) || (MPCSXFER_CtrlBuf->procId == MPCSXFER_INVALID_ID)) { if (MPCSXFER_CtrlBuf->procId == processorId) { /* Verify correctness of DSP data received. */ status = MPCSXFER_VerifyData (MPCSXFER_CtrlBuf, MPCSXFER_DataBuf, dspIter) ; if (DSP_SUCCEEDED (status)) { dspIter++ ; } }
/* 请求缓冲填充GPP数据 */
if (DSP_SUCCEEDED (status)) { /* Take control of the buffer. */ MPCSXFER_CtrlBuf->procId = ID_GPP ; MPCSXFER_CtrlBuf->iterNo = gppIter ; /* Fill the data buffer with the GPP stamp. */ for (i = 0 ; i < (MPCSXFER_BufferSize / 2) ; i++) { MPCSXFER_DataBuf [i] = MPCSXFER_GPP_STAMP ; } /* Increment the GPP iteration number for next time. */ gppIter++ ; } } else if (MPCSXFER_CtrlBuf->procId == ID_GPP) { /* Do nothing if the buffer is already filled by the GPP. Some * other processing can be done here if required. * Sleep for some time to simulate this. */ MPCSXFER_Sleep (10) ; }
1 POOL_writeback (POOL_makePoolId(processorId, SAMPLE_POOL_ID), 2 MPCSXFER_CtrlBuf, 3 DSPLINK_ALIGN (sizeof (MPCSXFER_Ctrl), 4 DSPLINK_BUF_ALIGN)) ; 5 POOL_writeback (POOL_makePoolId(processorId, SAMPLE_POOL_ID), 6 MPCSXFER_DataBuf, 7 MPCSXFER_BufferSize) ;
/* 离开保护性共享控制和数据缓冲 */
tmpStatus = MPCS_leave (MPCSXFER_MpcsHandle) ;
MPCSXFER_Delete (processorId) ;
status = PROC_stop (processorId) ;
tmpStatus = NOTIFY_unregister (processorId,MPCSXFER_IPS_ID,MPCSXFER_IPS_EVENTNO,
(FnNotifyCbck) MPCSXFER_Notify,MPCSXFER_SemPtr) ;
tmpStatus = MPCSXFER_DeleteSem (MPCSXFER_SemPtr) ;
tmpStatus = MPCS_close (processorId, MPCSXFER_MpcsHandle) ;
tmpStatus = MPCS_delete (processorId, MPCSXFER_MpcsObjName)
tmpStatus = POOL_free
(POOL_makePoolId(processorId, SAMPLE_POOL_ID),
(Void *) MPCSXFER_DataBuf,
MPCSXFER_BufferSize)
;
tmpStatus = POOL_free
(POOL_makePoolId(processorId, SAMPLE_POOL_ID),
(Void
*) MPCSXFER_CtrlBuf,
DSPLINK_ALIGN (sizeof
(MPCSXFER_Ctrl),
DSPLINK_BUF_ALIGN)) ;
tmpStatus = POOL_close (POOL_makePoolId(processorId, SAMPLE_POOL_ID)) ;
tmpStatus = PROC_detach (processorId) ;
tmpStatus = PROC_destroy () ;
--------------------------------------DSP端------------------------------------------
DSPLINK_init () ;
tskMpcsXferTask = TSK_create (tskMpcsXfer, NULL, 0) ;
status = TSKMPCSXFER_create (&info) ;
status = POOL_open (0, &poolObj) ;
/* 分配数据结构体 */
1 if (status == SYS_OK) { 2 *infoPtr = MEM_calloc (DSPLINK_SEGID, 3 sizeof (TSKMPCSXFER_TransferInfo), 4 0) ; /* No alignment restriction */ 5 if (*infoPtr == NULL) { 6 status = SYS_EALLOC ; 7 SET_FAILURE_REASON (status) ; 8 } 9 else { 10 info = *infoPtr ; 11 } 12 }
/* 填充传输信息结构体 */
1 if (status == SYS_OK) { 2 info->mpcsHandle = NULL ; /* Initialized in MPCS_open (). */ 3 info->dataBuf = NULL ; /* Set through notification callback. */ 4 info->ctrlBuf = NULL ; /* Set through notification callback. */ 5 info->numIterations = MPCSXFER_NumIterations ; 6 info->bufferSize = MPCSXFER_BufferSize ; 7 SEM_new (&(info->notifySemObj), 0) ; 8 }
/* 打开GPP创建的MPCS对象,设置与之相关的句柄 */
status = MPCS_open
(ID_GPP,
MPCSXFER_MPCSOBJ_NAME,
&(info->mpcsHandle))
;
/* 为获取GPP侧的控制和数据缓冲指针回调事件注册通知 */
status = NOTIFY_register
(ID_GPP,
MPCSXFER_IPS_ID,
MPCSXFER_IPS_EVENTNO,
(FnNotifyCbck)
TSKMPCSXFER_notify,
info) ;
/* 向GPP侧发送通知,DSP已经完成设置准备好进行进一步的执行动作了 */
status = NOTIFY_notify
(ID_GPP,
MPCSXFER_IPS_ID,
MPCSXFER_IPS_EVENTNO,
(Uint32) 0) ;
/* 等待GPP侧的回调事件,提交信号量以保证接收到控制缓冲指针 */
semStatus = SEM_pend (&(info->notifySemObj), SYS_FOREVER) ;
/* 等待GPP侧的回调事件,提交信号量以保证接收到数据缓冲指针 */
semStatus = SEM_pend (&(info->notifySemObj), SYS_FOREVER) ;
status = TSKMPCSXFER_execute (info) ;
/* 开始保护性共享控制和数据缓冲 */status = MPCS_enter (info->mpcsHandle) ;
/* 验证为控制器和数据缓冲的高速缓存 */
1 HAL_cacheInv ((Ptr) info->ctrlBuf, 2 DSPLINK_ALIGN (sizeof (MPCSXFER_Ctrl), 3 DSPLINK_BUF_ALIGN)) ; 4 HAL_cacheInv ((Ptr) info->dataBuf, info->bufferSize) ; 5 if ( (info->ctrlBuf->procId == ID_GPP) 6 || (info->ctrlBuf->procId == MPCSXFER_INVALID_ID)) { 7 if (info->ctrlBuf->procId == ID_GPP) { 8 /* Verify correctness of GPP data received. */ 9 status = MPCSXFER_verifyData (info->ctrlBuf, 10 info->dataBuf, 11 gppIter) ; 12 if (status == SYS_OK) { 13 info->ctrlBuf->dataVerify = (Uint32) TRUE ; 14 gppIter++ ; 15 } 16 else { 17 info->ctrlBuf->dataVerify = (Uint32) FALSE ; 18 } 19 }
/* 要求缓冲填充DSP数据 */
1 /* Take control of the buffer. */ 2 info->ctrlBuf->procId = dspId ; 3 info->ctrlBuf->iterNo = dspIter ; 4 5 /* Fill the data buffer with the DSP stamp. */ 6 for (i = 0 ; i < (info->bufferSize / 2) ; i++) { 7 info->dataBuf [i] = MPCSXFER_DSP_STAMP ; 8 } 9 10 /* Increment the DSP iteration number for next time. */ 11 dspIter++ ;
/* 回写控制和数据缓冲的内容到高速缓存中 */
1 HAL_cacheWbInv ((Ptr) info->ctrlBuf, 2 DSPLINK_ALIGN (sizeof (MPCSXFER_Ctrl), 3 DSPLINK_BUF_ALIGN)) ; 4 HAL_cacheWbInv ((Ptr) info->dataBuf, info->bufferSize) ;
tmpStatus = MPCS_leave (info->mpcsHandle) ;
status = TSKMPCSXFER_delete (info) ;
status = NOTIFY_unregister
(ID_GPP,
MPCSXFER_IPS_ID,
MPCSXFER_IPS_EVENTNO,
(FnNotifyCbck)
TSKMPCSXFER_notify,
info) ;
tmpStatus = MPCS_close (ID_GPP, info->mpcsHandle) ;
freeStatus = MEM_free
(DSPLINK_SEGID,
info,
sizeof (TSKMPCSXFER_TransferInfo)) ;
OMAPL138学习----DSPLINK DEMO解析之MPCSXFER,布布扣,bubuko.com
OMAPL138学习----DSPLINK DEMO解析之MPCSXFER
原文:http://www.cnblogs.com/zxycele/p/3628907.html